import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
from PIL import Image
import tensorflow_hub as hub
import os
import logging
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
Load Dataset from Tensorflow
train_dir = "input/train"
test_dir = "input/test"
print("Training set:")
num_glaucoma = len(os.listdir(os.path.join(train_dir, 'Glaucoma')))
num_nonGlaucoma = len(os.listdir(os.path.join(train_dir, 'Non Glaucoma')))
print(f"Glaucoma={num_glaucoma}")
print(f"Non Glaucoma={num_nonGlaucoma}")
print("Testing set:")
print(f"Glaucoma={len(os.listdir(os.path.join(test_dir, 'glaucoma')))}")
print(f"Non Glaucoma={len(os.listdir(os.path.join(test_dir, 'non glaucoma')))}")
glaucoma = os.listdir("input/train/Glaucoma")
glaucoma_dir = "input/train/Glaucoma"
Visualizing the Glaucoma Images
plt.figure(figsize=(20, 10))
for i in range(9):
plt.subplot(3, 3, i + 1)
img = plt.imread(os.path.join(glaucoma_dir, glaucoma[i]))
plt.imshow(img, cmap = plt.cm.binary)
plt.axis('off')
plt.tight_layout()
Visualizing Non Glaucoma Images
normal = os.listdir("input/train/Non Glaucoma")
normal_dir = "input/train/Non Glaucoma"
plt.figure(figsize=(20, 10))
for i in range(9):
plt.subplot(3, 3, i + 1)
img = plt.imread(os.path.join(normal_dir, normal[i]))
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.tight_layout()
glaucoma_img = os.listdir("input/train/Glaucoma")[1]
sample_img = plt.imread(os.path.join(glaucoma_dir, glaucoma_img))
plt.imshow(sample_img, cmap='gray')
plt.colorbar()
plt.title('Raw Glaucoma Image')
print(sample_img.shape)
Creating a Pipeline for the Dataset
datagen = ImageDataGenerator( rotation_range=60,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_set = datagen.flow_from_directory('input/train/', class_mode='binary', batch_size=64, target_size=(160, 160))
# load and iterate validation dataset
test_set = datagen.flow_from_directory('input/test/', class_mode='binary', batch_size=1, target_size = (160,160))
validation_set = datagen.flow_from_directory('input/random/', class_mode='binary', batch_size=1, target_size = (160,160))
Using the Model from Tensorflow HUB
URL = "https://tfhub.dev/google/imagenet/mobilenet_v2_035_160/classification/4"
feature_extract = hub.KerasLayer(URL, input_shape =(160,160,3))
feature_extract.trainable = False
mymodel = tf.keras.Sequential([feature_extract,
tf.keras.layers.Dense(8, activation='softmax')])
mymodel.summary()
mymodel.compile(optimizer='adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
weight_for_0 = num_glaucoma / (num_nonGlaucoma + num_glaucoma)
weight_for_1 = num_nonGlaucoma / (num_nonGlaucoma + num_glaucoma)
class_weight = {0: weight_for_0, 1: weight_for_1}
print(f"Weight for class 0 Normal: {weight_for_0:.2f}")
print(f"Weight for class 1 Non Glaucoma: {weight_for_1:.2f}")
TRAINING
EPOCHS = 30
#validation loss doesn't reduces for 3 consecutive epoch stop training..
early_stopping = tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', patience = 2)
history = mymodel.fit(train_set,
epochs = EPOCHS,
validation_data = validation_set,
class_weight = class_weight,
callbacks= [early_stopping]
)
Saving the Model
mymodel.save("glaucoma.h5")
Loading the Saved Model
save_model_path = './glaucoma.h5'
# new_model = tf.keras.models.load_model(save_model_path)
# print(reloaded_model.get_config())
# reloaded_model = tf.keras.experimental.load_from_saved_model(save_model_path, custom_objects={'KerasLayer':hub.KerasLayer})
# print(reloaded_model.get_config())
new_model = tf.keras.models.load_model('./glaucoma.h5',custom_objects={'KerasLayer':hub.KerasLayer})
new_model.summary()
loss,accuracy = new_model.evaluate(test_set)
plt.figure(figsize=(12, 8))
plt.subplot(2, 1, 1)
plt.plot(history.history['loss'], label='Loss')
plt.plot(history.history['val_loss'], label='Val Loss')
plt.legend()
plt.title('Training Loss Vs Validation Loss')
plt.subplot(2, 1, 2)
plt.plot(history.history['accuracy'], label='Accuracy')
plt.plot(history.history['val_accuracy'], label='Val Accuracy')
plt.legend()
plt.title('Training Vs Validation Accuracy')
def process_image(image_numpy):
image_tf = tf.convert_to_tensor(image_numpy, np.float32)
resized_image = tf.image.resize(image_tf,(160,160))
resized_image /=255
return resized_image.numpy()
def predictor(image_path, model, top_k = 1):
im = Image.open(image_path)
test_image = np.asarray(im)
processed_image = process_image(test_image)
perfect_image = np.expand_dims(processed_image, axis=0)
predicts = model.predict(perfect_image)
classes = []
prob,labels = tf.math.top_k(predicts, k = top_k, sorted = True)
prob = prob.numpy()
prob = prob.squeeze()
probs = list(prob)
label = labels.numpy()
label = label.squeeze()
classes = list(label)
return probs,classes
image_path = "input/random/glaucoma/glimage51prime.jpg"
im = Image.open(image_path)
orgi_image = np.asarray(im)
prob,labels = predictor(image_path, new_model,2)
class_names=["Glaucoma", "Non Glaucoma"]
topk_classnames = []
limit = len(labels)
for i in range(limit):
label = labels[i]
topk_classnames.append(class_names[label])
fig, (ax1) = plt.subplots(figsize=(12,10))
ax1.imshow(orgi_image, cmap = plt.cm.binary)
ax1.axis("off")
ax1.set_title(topk_classnames[0])
plt.tight_layout()
!jupyter nbconvert --to html Glaucoma.ipynb